d <- read_csv(data_path)
##
## ── Column specification ────────────────────────────────────────────────────────
## cols(
## .default = col_character(),
## trial_index = col_double(),
## time_elapsed = col_double(),
## workerId = col_logical(),
## hitId = col_logical(),
## assignmentId = col_logical(),
## experiment_id = col_double(),
## survey_code = col_double(),
## seed = col_double(),
## success = col_logical(),
## timeout = col_logical(),
## rt = col_double(),
## start_time = col_double(),
## end_time = col_double(),
## choice_index = col_double(),
## reward_score = col_double(),
## reward = col_double(),
## reward_score_unadjusted = col_double(),
## score_after_trial = col_double(),
## slider_start = col_double(),
## n = col_double()
## )
## ℹ Use `spec()` for the full column specifications.
d <- d %>%
filter(
!(trial_type %in% c("show-reward"))
)
Adding columns to characterize participant choices.
d <- d %>%
mutate(
trial_number = case_when(
trial_index<8 ~ trial_index,
trial_index<199 ~ 7+(trial_index-7)/2,
TRUE ~ trial_index-96
)
) %>%
relocate(trial_number,.after=trial_index) %>%
mutate(
test_trial_number = case_when(
trial_number<7 ~ NA_real_,
trial_number<103 ~ trial_number-6,
TRUE ~ NA_real_
)
) %>%
relocate(test_trial_number,.after=trial_number) %>%
mutate(
block_trial_number = case_when(
test_trial_number < 49 ~ test_trial_number,
TRUE ~ test_trial_number - 48),
block_trial_number_c = block_trial_number - 24.5
) %>%
relocate(block_trial_number,.after=test_trial_number) %>%
relocate(block_trial_number_c,.after=block_trial_number) %>%
mutate(
explore_block = case_when(
test_trial_number<9 ~ 1,
test_trial_number<17 ~ 2,
test_trial_number<25 ~ 3,
test_trial_number<33 ~ 4,
test_trial_number < 41 ~ 5,
test_trial_number < 49 ~ 6,
test_trial_number < 57 ~ 7,
test_trial_number<65 ~ 8,
test_trial_number<73 ~ 9,
test_trial_number<81 ~ 10,
test_trial_number <89 ~ 11,
test_trial_number <97 ~ 12,
TRUE ~ NA_real_
)
) %>%
mutate(
max_reward_choice = case_when(
reward_score_unadjusted ==8 ~ 1,
!is.na(test_trial_number) ~ 0,
TRUE ~ NA_real_
)
) %>%
mutate(
cur_structure_condition=case_when(
test_trial_number < 49 ~ structure_condition,
!is.na(test_trial_number) & match_condition == "match" ~ structure_condition,
test_trial_number >= 49 & structure_condition == "emotion" ~ "model",
test_trial_number >= 49 & structure_condition == "model" ~ "emotion"
)
) %>%
mutate(block = case_when(
test_trial_number < 49 ~ 1,
test_trial_number >= 49 ~ 2,
TRUE ~ NA_real_
))
#recenter vars
d <- d %>%
mutate(
structure_condition_c = case_when(
structure_condition == "model" ~ -0.5,
structure_condition == "emotion" ~ 0.5),
cur_structure_condition_c = case_when(
cur_structure_condition == "model" ~ -0.5,
cur_structure_condition == "emotion" ~ 0.5),
match_condition_c = case_when(
match_condition == "match" ~ 0.5,
match_condition == "mismatch" ~ -0.5
),
cur_structure_condition_model = case_when(
cur_structure_condition == "model" ~ 0,
cur_structure_condition == "emotion" ~ 1),
cur_structure_condition_emotion = case_when(
cur_structure_condition == "model" ~ -1,
cur_structure_condition == "emotion" ~ 0),
match_condition_match = case_when(
match_condition == "match" ~ 0,
match_condition == "mismatch" ~ -1
),
match_condition_mismatch = case_when(
match_condition == "match" ~ 1,
match_condition == "mismatch" ~ 0
),
block_c = case_when(
test_trial_number < 49 ~ -0.5,
TRUE ~ 0.5
),
block_learn = case_when(
block==1 ~ 0,
block==2 ~ 1
),
block_gen = case_when(
block==1 ~ -1,
block==2 ~ 0
)
)
open_resps <- d %>%
filter(trial_index %in% 206) %>%
select(subject, structure_condition, match_condition ,response) %>%
extract(response, into = c("patterns", "strategy", "comments"),
regex = "patterns\":\"(.*)\",\"strategy\":\"(.*)\",\"comments\":\"(.*)")
#write_csv(open_resps, here("data-analysis","data","v1","processed","emogo-v1-openresponses.csv"))
attention_check <- d %>%
filter(trial_index %in% c(4,5)) %>%
mutate(
attention_check_correct_choice = case_when(
trial_index == 4 ~ "stimuli/horse.jpg",
trial_index == 5 ~ "stimuli/hammer.jpg"
),
check_correct = ifelse(attention_check_correct_choice==choiceImage,1,0)
) %>%
group_by(subject) %>%
summarize(
N=n(),
avg_correct = mean(check_correct)
)
passed_attention_check <- attention_check %>%
filter(avg_correct ==1) %>%
pull(subject)
total_time <- d %>%
filter(trial_index==206) %>%
select(subject,time_elapsed) %>%
distinct() %>%
mutate(time_mins = time_elapsed/1000/60)
#Minumum time
min(total_time$time_mins)
## [1] 6.2892
#Any subjects with times under 4 minutes?
subjects_too_fast <- total_time %>%
filter(time_mins<4)
subjects_too_fast %>%
pull(subject)
## character(0)
percent_location_selections <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,choiceLocation) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n))
## `summarise()` has grouped output by 'subject'. You can override using the
## `.groups` argument.
#any frequencies above 80%?
subjects_same_location_exclusion <- percent_location_selections %>%
filter(freq>0.8)
subjects_same_location_exclusion %>%
distinct(subject) %>%
pull(subject)
## [1] "p92882"
reward_rank <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(test_trial_number==96) %>%
select(subject,structure_condition,match_condition,score_after_trial)
median_score <- median(reward_rank$score_after_trial)
ggplot(reward_rank,aes(x=score_after_trial))+
geom_histogram()+
geom_vline(xintercept = median_score)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggplot(reward_rank,aes(x=score_after_trial,color=match_condition))+
geom_density()+
facet_wrap(~structure_condition)
subjects_top_50 <- reward_rank %>%
filter(score_after_trial>=median_score) %>%
select(subject)
write_csv(subjects_top_50,here("data-analysis","data","v1","processed","subjects_top_50.csv"))
conditions_top_50 <- reward_rank %>%
filter(subject %in% subjects_top_50$subject) %>%
group_by(structure_condition,match_condition) %>%
tally()
conditions_top_50
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 10
## 2 emotion mismatch 11
## 3 model match 20
## 4 model mismatch 7
#exclude any participants who meet exclusion criteria
d <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(!(subject %in% subjects_same_location_exclusion)) %>%
filter(!(subject %in% subjects_too_fast))
d %>%
distinct(subject,structure_condition,match_condition) %>%
group_by(structure_condition,match_condition) %>%
tally()
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 25
## 2 emotion mismatch 23
## 3 model match 25
## 4 model mismatch 22
subject_by_block <- d %>%
filter(!is.na(explore_block)) %>%
group_by(subject,match_condition,structure_condition,explore_block) %>%
summarize(
max_choice_percent=mean(max_reward_choice)
)
## `summarise()` has grouped output by 'subject', 'match_condition',
## 'structure_condition'. You can override using the `.groups` argument.
summarize_by_block <- subject_by_block %>%
group_by(explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
summarize_by_block_by_condition <- subject_by_block %>%
group_by(match_condition,structure_condition,explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
## `summarise()` has grouped output by 'match_condition', 'structure_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_trial <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,test_trial_number) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_block <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,explore_block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_subject <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,cur_structure_condition, match_condition,block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8)
) %>%
mutate(
block_name = case_when(
block==2 ~ "generalization block",
block==1~"learning block"
)
)
## `summarise()` has grouped output by 'subject', 'cur_structure_condition',
## 'match_condition'. You can override using the `.groups` argument.
summarize_choice_by_subject$block_name <- factor(summarize_choice_by_subject$block_name, levels=c("learning block","generalization block"))
ggplot(subject_by_block,aes(explore_block,max_choice_percent))+
geom_point(size=1.5,alpha=0.1,aes(group=subject))+
geom_line(alpha=0.1,aes(group=subject))+
geom_point(data=summarize_by_block,aes(y=max_choice),size=2,color="black")+
geom_line(data=summarize_by_block,aes(y=max_choice),size=1.2,color="black")+
geom_errorbar(data=summarize_by_block,aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0,color="black")+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## Warning: Please use `linewidth` instead.
ggplot(summarize_by_block_by_condition,aes(explore_block,max_choice, color=structure_condition,shape=match_condition,linetype=match_condition))+
geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_point(aes(y=max_choice),size=2)+
geom_line(aes(y=max_choice),size=1.2)+
geom_errorbar(aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
#theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(subject_by_block,aes(explore_block,max_choice_percent, group=subject))+
#geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
facet_wrap(~structure_condition+match_condition)+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(summarize_choice_by_trial,aes(test_trial_number,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=48.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Trial Number")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_block,aes(explore_block,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_subject,aes(cur_structure_condition,reward_8,color = cur_structure_condition))+
geom_boxplot()+
geom_jitter(width=0.1)+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Structure Condition")+
ylab("Percent Reward-Maximizing Choices")+
facet_wrap(~match_condition+block_name)+
theme_cowplot()+
theme(legend.position="none")
ggsave(here(figure_path,"overall_reward_maximizing.png"),width=6,height=6)
#### Pruning random effects structure
#maximal random effects structure
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_c*block_trial_number_c + (1+block_c*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_c * block_trial_number_c + (1 + block_c * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 7514.5 7706.7 -3730.2 7460.5 9093
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -37.474 -0.377 0.048 0.364 10.021
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.201954 2.28078
## subject (Intercept) 3.281357 1.81145
## block_c 7.979980 2.82489 0.00
## block_trial_number_c 0.003100 0.05568 0.91 0.05
## block_c:block_trial_number_c 0.004855 0.06968 0.21 0.64 0.21
## Number of obs: 9120, groups: choiceImage, 248; subject, 95
##
## Fixed effects:
## Estimate
## (Intercept) -0.0690312
## cur_structure_condition_c -0.5765180
## match_condition_c 0.4735088
## block_c -0.0448018
## block_trial_number_c 0.0574699
## cur_structure_condition_c:match_condition_c -1.1480656
## cur_structure_condition_c:block_c -1.5073018
## match_condition_c:block_c 1.6953327
## cur_structure_condition_c:block_trial_number_c -0.0300980
## match_condition_c:block_trial_number_c 0.0076376
## block_c:block_trial_number_c -0.0008284
## cur_structure_condition_c:match_condition_c:block_c -1.1136973
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.0645662
## cur_structure_condition_c:block_c:block_trial_number_c -0.0437226
## match_condition_c:block_c:block_trial_number_c 0.0466897
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -0.0111552
## Std. Error
## (Intercept) 0.2442057
## cur_structure_condition_c 0.3435170
## match_condition_c 0.3824405
## block_c 0.3085726
## block_trial_number_c 0.0064519
## cur_structure_condition_c:match_condition_c 0.6853306
## cur_structure_condition_c:block_c 0.6947806
## match_condition_c:block_c 0.6059410
## cur_structure_condition_c:block_trial_number_c 0.0110312
## match_condition_c:block_trial_number_c 0.0127911
## block_c:block_trial_number_c 0.0093105
## cur_structure_condition_c:match_condition_c:block_c 1.3825534
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.0220394
## cur_structure_condition_c:block_c:block_trial_number_c 0.0223865
## match_condition_c:block_c:block_trial_number_c 0.0182628
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.0447065
## z value
## (Intercept) -0.283
## cur_structure_condition_c -1.678
## match_condition_c 1.238
## block_c -0.145
## block_trial_number_c 8.907
## cur_structure_condition_c:match_condition_c -1.675
## cur_structure_condition_c:block_c -2.169
## match_condition_c:block_c 2.798
## cur_structure_condition_c:block_trial_number_c -2.728
## match_condition_c:block_trial_number_c 0.597
## block_c:block_trial_number_c -0.089
## cur_structure_condition_c:match_condition_c:block_c -0.806
## cur_structure_condition_c:match_condition_c:block_trial_number_c -2.930
## cur_structure_condition_c:block_c:block_trial_number_c -1.953
## match_condition_c:block_c:block_trial_number_c 2.557
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -0.250
## Pr(>|z|)
## (Intercept) 0.77742
## cur_structure_condition_c 0.09329
## match_condition_c 0.21567
## block_c 0.88456
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.09390
## cur_structure_condition_c:block_c 0.03005
## match_condition_c:block_c 0.00514
## cur_structure_condition_c:block_trial_number_c 0.00636
## match_condition_c:block_trial_number_c 0.55044
## block_c:block_trial_number_c 0.92910
## cur_structure_condition_c:match_condition_c:block_c 0.42051
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.00339
## cur_structure_condition_c:block_c:block_trial_number_c 0.05081
## match_condition_c:block_c:block_trial_number_c 0.01057
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.80296
##
## (Intercept)
## cur_structure_condition_c .
## match_condition_c
## block_c
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c .
## cur_structure_condition_c:block_c *
## match_condition_c:block_c **
## cur_structure_condition_c:block_trial_number_c **
## match_condition_c:block_trial_number_c
## block_c:block_trial_number_c
## cur_structure_condition_c:match_condition_c:block_c
## cur_structure_condition_c:match_condition_c:block_trial_number_c **
## cur_structure_condition_c:block_c:block_trial_number_c .
## match_condition_c:block_c:block_trial_number_c *
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
#create predicted data
pX <- expand.grid(
cur_structure_condition_c=c(-0.5,0.5),
match_condition_c=c(-0.5,0.5),
block_c=c(-0.5,0.5),
block_trial_number_c=seq(-23.5,23.5,by=1))
predictions <- predictSE(m,pX,re.form=NA, type="response")
pX$fit <- predictions$fit
pX$se.fit <- predictions$se.fit
pX <- pX %>%
mutate(
block_trial_number = block_trial_number_c+24.5,
cur_structure_condition = case_when(
cur_structure_condition_c==0.5 ~ "emotion",
cur_structure_condition_c==-0.5 ~ "model"
),
match_condition=case_when(
match_condition_c==0.5 ~ "match",
match_condition_c==-0.5 ~ "mismatch"
),
block_name = case_when(
block_c==0.5 ~ "generalization block",
block_c==-0.5~"learning block"
)
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
d <- d %>%
mutate(
block_name = case_when(
block==1 ~ "learning block",
block==2 ~ "generalization block")
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
p <- ggplot(subset(d,!is.na(block_trial_number)),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.2,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=pX,aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition+block_name)+
theme(legend.position=c(0.4,0.4))
p
## Warning: Using the `size` aesthietic with geom_polygon was deprecated in ggplot2
## 3.4.0.
## Warning: Please use the `linewidth` aesthetic instead.
ggsave(here(figure_path,"model_predictions.png"),width=9,height=6)
p <- ggplot(subset(d,!is.na(block_trial_number)&block==2),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.1,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=filter(pX,block_name=="generalization block"),aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition)+
theme(legend.position=c(0.39,0.45),legend.title=element_text(size=14),legend.text=element_text(size=12),legend.background=element_rect(fill =NA))
p
ggsave(here(figure_path,"model_prediction_generalization_only.pdf"),width=9,height=6)
Recenter the model on the learning block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_learn*block_trial_number_c+ (1+block_learn*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_learn * block_trial_number_c + (1 + block_learn * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 7514.5 7706.7 -3730.2 7460.5 9093
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -37.474 -0.377 0.048 0.364 10.021
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.201978 2.28078
## subject (Intercept) 5.297508 2.30163
## block_learn 7.979953 2.82488 -0.62
## block_trial_number_c 0.003516 0.05929 0.77 -0.33
## block_learn:block_trial_number_c 0.004855 0.06968 -0.23 0.64
##
##
##
##
##
## -0.39
## Number of obs: 9120, groups: choiceImage, 248; subject, 95
##
## Fixed effects:
## Estimate
## (Intercept) -0.0466175
## cur_structure_condition_c 0.1770757
## match_condition_c -0.3741725
## block_learn -0.0448020
## block_trial_number_c 0.0578843
## cur_structure_condition_c:match_condition_c -0.5912516
## cur_structure_condition_c:block_learn -1.5072353
## match_condition_c:block_learn 1.6952924
## cur_structure_condition_c:block_trial_number_c -0.0082382
## match_condition_c:block_trial_number_c -0.0157079
## block_learn:block_trial_number_c -0.0008284
## cur_structure_condition_c:match_condition_c:block_learn -1.1138561
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.0589892
## cur_structure_condition_c:block_learn:block_trial_number_c -0.0437210
## match_condition_c:block_learn:block_trial_number_c 0.0466887
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -0.0111605
## Std. Error
## (Intercept) 0.2884954
## cur_structure_condition_c 0.4875243
## match_condition_c 0.4872009
## block_learn 0.3086071
## block_trial_number_c 0.0073211
## cur_structure_condition_c:match_condition_c 0.9713482
## cur_structure_condition_c:block_learn 0.6949556
## match_condition_c:block_learn 0.6063002
## cur_structure_condition_c:block_trial_number_c 0.0144065
## match_condition_c:block_trial_number_c 0.0144253
## block_learn:block_trial_number_c 0.0093109
## cur_structure_condition_c:match_condition_c:block_learn 1.3857101
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.0287637
## cur_structure_condition_c:block_learn:block_trial_number_c 0.0223922
## match_condition_c:block_learn:block_trial_number_c 0.0182662
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.0447686
## z value
## (Intercept) -0.162
## cur_structure_condition_c 0.363
## match_condition_c -0.768
## block_learn -0.145
## block_trial_number_c 7.907
## cur_structure_condition_c:match_condition_c -0.609
## cur_structure_condition_c:block_learn -2.169
## match_condition_c:block_learn 2.796
## cur_structure_condition_c:block_trial_number_c -0.572
## match_condition_c:block_trial_number_c -1.089
## block_learn:block_trial_number_c -0.089
## cur_structure_condition_c:match_condition_c:block_learn -0.804
## cur_structure_condition_c:match_condition_c:block_trial_number_c -2.051
## cur_structure_condition_c:block_learn:block_trial_number_c -1.953
## match_condition_c:block_learn:block_trial_number_c 2.556
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -0.249
## Pr(>|z|)
## (Intercept) 0.87163
## cur_structure_condition_c 0.71644
## match_condition_c 0.44248
## block_learn 0.88457
## block_trial_number_c 2.65e-15
## cur_structure_condition_c:match_condition_c 0.54273
## cur_structure_condition_c:block_learn 0.03010
## match_condition_c:block_learn 0.00517
## cur_structure_condition_c:block_trial_number_c 0.56743
## match_condition_c:block_trial_number_c 0.27619
## block_learn:block_trial_number_c 0.92910
## cur_structure_condition_c:match_condition_c:block_learn 0.42150
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.04028
## cur_structure_condition_c:block_learn:block_trial_number_c 0.05088
## match_condition_c:block_learn:block_trial_number_c 0.01059
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.80313
##
## (Intercept)
## cur_structure_condition_c
## match_condition_c
## block_learn
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c
## cur_structure_condition_c:block_learn *
## match_condition_c:block_learn **
## cur_structure_condition_c:block_trial_number_c
## match_condition_c:block_trial_number_c
## block_learn:block_trial_number_c
## cur_structure_condition_c:match_condition_c:block_learn
## cur_structure_condition_c:match_condition_c:block_trial_number_c *
## cur_structure_condition_c:block_learn:block_trial_number_c .
## match_condition_c:block_learn:block_trial_number_c *
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Recenter the model on the generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_gen*block_trial_number_c+ (1+block_gen*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_gen * block_trial_number_c + (1 + block_gen * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 7514.5 7706.7 -3730.2 7460.5 9093
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -37.474 -0.377 0.048 0.364 10.021
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.201946 2.28078
## subject (Intercept) 5.255104 2.29240
## block_gen 7.979806 2.82486 0.61
## block_trial_number_c 0.005112 0.07150 0.86 0.35
## block_gen:block_trial_number_c 0.004855 0.06968 0.56 0.64 0.65
## Number of obs: 9120, groups: choiceImage, 248; subject, 95
##
## Fixed effects:
## Estimate
## (Intercept) -0.091438
## cur_structure_condition_c -1.330107
## match_condition_c 1.321109
## block_gen -0.044821
## block_trial_number_c 0.057055
## cur_structure_condition_c:match_condition_c -1.704948
## cur_structure_condition_c:block_gen -1.507199
## match_condition_c:block_gen 1.695279
## cur_structure_condition_c:block_trial_number_c -0.051957
## match_condition_c:block_trial_number_c 0.030980
## block_gen:block_trial_number_c -0.000829
## cur_structure_condition_c:match_condition_c:block_gen -1.113944
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.070145
## cur_structure_condition_c:block_gen:block_trial_number_c -0.043719
## match_condition_c:block_gen:block_trial_number_c 0.046688
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -0.011161
## Std. Error
## (Intercept) 0.289259
## cur_structure_condition_c 0.489954
## match_condition_c 0.488973
## block_gen 0.308578
## block_trial_number_c 0.008544
## cur_structure_condition_c:match_condition_c 0.977473
## cur_structure_condition_c:block_gen 0.695342
## match_condition_c:block_gen 0.606059
## cur_structure_condition_c:block_trial_number_c 0.016935
## match_condition_c:block_trial_number_c 0.016914
## block_gen:block_trial_number_c 0.009310
## cur_structure_condition_c:match_condition_c:block_gen 1.387967
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.033862
## cur_structure_condition_c:block_gen:block_trial_number_c 0.022404
## match_condition_c:block_gen:block_trial_number_c 0.018264
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.044806
## z value
## (Intercept) -0.316
## cur_structure_condition_c -2.715
## match_condition_c 2.702
## block_gen -0.145
## block_trial_number_c 6.678
## cur_structure_condition_c:match_condition_c -1.744
## cur_structure_condition_c:block_gen -2.168
## match_condition_c:block_gen 2.797
## cur_structure_condition_c:block_trial_number_c -3.068
## match_condition_c:block_trial_number_c 1.832
## block_gen:block_trial_number_c -0.089
## cur_structure_condition_c:match_condition_c:block_gen -0.803
## cur_structure_condition_c:match_condition_c:block_trial_number_c -2.072
## cur_structure_condition_c:block_gen:block_trial_number_c -1.951
## match_condition_c:block_gen:block_trial_number_c 2.556
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -0.249
## Pr(>|z|)
## (Intercept) 0.75192
## cur_structure_condition_c 0.00663
## match_condition_c 0.00690
## block_gen 0.88451
## block_trial_number_c 2.43e-11
## cur_structure_condition_c:match_condition_c 0.08112
## cur_structure_condition_c:block_gen 0.03019
## match_condition_c:block_gen 0.00515
## cur_structure_condition_c:block_trial_number_c 0.00216
## match_condition_c:block_trial_number_c 0.06701
## block_gen:block_trial_number_c 0.92905
## cur_structure_condition_c:match_condition_c:block_gen 0.42222
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.03831
## cur_structure_condition_c:block_gen:block_trial_number_c 0.05100
## match_condition_c:block_gen:block_trial_number_c 0.01058
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.80329
##
## (Intercept)
## cur_structure_condition_c **
## match_condition_c **
## block_gen
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c .
## cur_structure_condition_c:block_gen *
## match_condition_c:block_gen **
## cur_structure_condition_c:block_trial_number_c **
## match_condition_c:block_trial_number_c .
## block_gen:block_trial_number_c
## cur_structure_condition_c:match_condition_c:block_gen
## cur_structure_condition_c:match_condition_c:block_trial_number_c *
## cur_structure_condition_c:block_gen:block_trial_number_c .
## match_condition_c:block_gen:block_trial_number_c *
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Not evaludated for v2
trying to sort out how to look at individual conditions and blocks
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c*block_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
summary(m)#not really sure why there should be a match effect here... Noise?
## learning block
# m <- glmer(max_reward_choice ~ structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# # re-run so I don't have to re-label things
# m <- glmer(max_reward_choice ~ structure_condition*block_trial_number+ (1+block_trial_number|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number", "structure_condition"),
# show.data = T,
# jitter = .01,
# title = "",
# axis.title = c("Trial", "Reward Maximizing Choices"),
# legend.title = "Structure Condition",
# colors = c( "orange3", "green4"),
# auto.label = FALSE)+
# theme_classic(base_size = 14, base_family = "")
# overall (same model as in section above)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
# generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=20000)))
summary(m)
sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number_c", "match_condition_c","cur_structure_condition_c"),
jitter = .01,
title = "Structure Condition",
axis.title = c("Trial", "Reward Maximizing Choices"),
legend.title = "Match Condition",
colors = c( "firebrick", "darkblue")) +
theme_classic(base_size = 14, base_family = "")